import numpy as np
import pickle
import cv2
from os import listdir
from sklearn.preprocessing import LabelBinarizer
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Convolution2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation, Flatten, Dropout, Dense
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import img_to_array
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from PIL import Image, ImageOps
!git clone https://github.com/maxogden/cats.git
!git clone https://github.com/ardamavi/Dog-Cat-Classifier.git
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
from tqdm import tqdm
DATADIR = "/content/cats"
CATEGORIES = ["cat_photos"]
IMG_SIZE = 100
training_data = []
Labels = []
def create_training_data():
for category in CATEGORIES: # do dogs and cats
path = os.path.join(DATADIR,category) # create path to dogs and cats
class_num = 1 # get the classification (0 or a 1). 0=dog 1=cat
for img in tqdm(os.listdir(path)): # iterate over each image per dogs and cats
try:
img_array = cv2.imread(os.path.join(path,img)) # convert to array
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize to normalize data size
training_data.append(new_array) # add this to our training_data
Labels.append(class_num)
#data augmentation (rottating)
(h, w) = new_array.shape[:2]
(cX, cY) = (w // 2, h // 2)
# rotate our image by 45 degrees around the center of the image
M = cv2.getRotationMatrix2D((cX, cY), 45, 1.0)
rotated = cv2.warpAffine(new_array, M, (w, h))
training_data.append(rotated) # add this to our training_data
Labels.append(class_num)
#data augmentation (mirror)
new_array=cv2.flip(img_array, 1)
new_array = cv2.resize(new_array, (IMG_SIZE, IMG_SIZE)) # resize to normalize data size
training_data.append(new_array) # add this to our training_data
Labels.append(class_num)
#data augmentation (rottating)
(h, w) = new_array.shape[:2]
(cX, cY) = (w // 2, h // 2)
# rotate our image by 45 degrees around the center of the image
M = cv2.getRotationMatrix2D((cX, cY), 45, 1.0)
rotated = cv2.warpAffine(new_array, M, (w, h))
training_data.append(rotated) # add this to our training_data
Labels.append(class_num)
except Exception as e: # in the interest in keeping the output clean...
pass
create_training_data()
print(len(training_data))
CATEGORIES = ["catmapper"]
def create_training_data():
for category in CATEGORIES: # do dogs and cats
path = os.path.join(DATADIR,category) # create path to dogs and cats
class_num = 1 # get the classification (0 or a 1). 0=dog 1=cat
for img in tqdm(os.listdir(path)): # iterate over each image per dogs and cats
try:
img_array = cv2.imread(os.path.join(path,img)) # convert to array
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize to normalize data size
training_data.append(new_array) # add this to our training_data
Labels.append(class_num)
#data augmentation (rottating)
(h, w) = new_array.shape[:2]
(cX, cY) = (w // 2, h // 2)
# rotate our image by 45 degrees around the center of the image
M = cv2.getRotationMatrix2D((cX, cY), 45, 1.0)
rotated = cv2.warpAffine(new_array, M, (w, h))
training_data.append(rotated) # add this to our training_data
Labels.append(class_num)
#data augmentation (mirror)
new_array=cv2.flip(img_array, 1)
new_array = cv2.resize(new_array, (IMG_SIZE, IMG_SIZE)) # resize to normalize data size
training_data.append(new_array) # add this to our training_data
Labels.append(class_num)
#data augmentation (rottating)
(h, w) = new_array.shape[:2]
(cX, cY) = (w // 2, h // 2)
# rotate our image by 45 degrees around the center of the image
M = cv2.getRotationMatrix2D((cX, cY), 45, 1.0)
rotated = cv2.warpAffine(new_array, M, (w, h))
training_data.append(rotated) # add this to our training_data
Labels.append(class_num)
except Exception as e: # in the interest in keeping the output clean...
pass
create_training_data()
print(len(training_data))
exambe=7
plt.imshow(training_data[exambe])
plt.show()
exambe=16
plt.imshow(training_data[exambe])
plt.show()
DATADIR = "/content/Dog-Cat-Classifier/Data/Train_Data"
CATEGORIES = ["dog"]
def create_training_data():
for category in CATEGORIES: # do dogs and cats
path = os.path.join(DATADIR,category) # create path to dogs and cats
class_num = 0 # get the classification (0 or a 1). 0=dog 1=cat
for img in tqdm(os.listdir(path)): # iterate over each image per dogs and cats
try:
img_array = cv2.imread(os.path.join(path,img)) # convert to array
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize to normalize data size
training_data.append(new_array) # add this to our training_data
Labels.append(class_num)
#data augmentation (rottating)
(h, w) = new_array.shape[:2]
(cX, cY) = (w // 2, h // 2)
# rotate our image by 45 degrees around the center of the image
M = cv2.getRotationMatrix2D((cX, cY), 45, 1.0)
rotated = cv2.warpAffine(new_array, M, (w, h))
training_data.append(rotated) # add this to our training_data
Labels.append(class_num)
#data augmentation (mirror)
new_array=cv2.flip(img_array, 1)
new_array = cv2.resize(new_array, (IMG_SIZE, IMG_SIZE)) # resize to normalize data size
training_data.append(new_array) # add this to our training_data
Labels.append(class_num)
#data augmentation (rottating)
(h, w) = new_array.shape[:2]
(cX, cY) = (w // 2, h // 2)
# rotate our image by 45 degrees around the center of the image
M = cv2.getRotationMatrix2D((cX, cY), 45, 1.0)
rotated = cv2.warpAffine(new_array, M, (w, h))
training_data.append(rotated) # add this to our training_data
Labels.append(class_num)
except Exception as e: # in the interest in keeping the output clean...
pass
create_training_data()
print(len(training_data))
import pandas as pd
df=pd.DataFrame()
df["image"]=training_data
df["Labels"]=Labels
plt.figure()
plt.bar(['DOG', 'CAT'], [len(df[df["Labels"]==0]),len(df[df["Labels"]==1])], color='r')
df["Labels"].unique()
X_train, X_test, y_train, y_test =train_test_split(df["image"], df["Labels"], random_state = 3)
del [df]
plt.figure()
xvals = range(len([len(X_train), len(X_test), len(y_train), len(y_test)]))
plt.bar(['X_train', 'X_test', 'y_train', 'y_test'], [len(X_train), len(X_test), len(y_train), len(y_test)], color='y')
X=tf.convert_to_tensor(list(X_train.values))
y=y_train.values
X.dtype
for exambe in range(20):
print("class :",y[exambe])
plt.imshow(X[exambe])
plt.show()
Ok, it seems like we have got so bad data set
exambe=50
plt.imshow(X[exambe])
plt.show()
X_norm=X/255
model = Sequential()
model.add(Conv2D(128, (3, 3), input_shape=(IMG_SIZE,IMG_SIZE,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.35))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(128))
model.add(Dropout(0.4))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.45))
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(0.45))
model.add(Dense(1))
model.add(Activation("sigmoid"))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
model.fit(X_norm,y, batch_size=24, epochs=50, validation_split=0.2)
.
the model have Overfittting problem but the val_accuracy is just NAH..acceptable.
I think that main reason is the poor data set
X_test_norm=X_test/255
# A few random samples
use_samples = list(range(0,20))
samples_to_predict = []
samples_stat=[]
# Generate plots for samples
for sample in use_samples:
# Add sample to array for prediction
samples_to_predict.append(X_test_norm.iloc[sample])
samples_stat.append(y_test.iloc[sample])
# Convert into Numpy array
samples_to_predict = np.array(samples_to_predict)
print(samples_to_predict.shape)
# Generate predictions for samples
predictions = model.predict(samples_to_predict)
counter=0;
for prediction in predictions:
#print('The right value is ',samples_stat[counter])
plt.imshow(samples_to_predict[counter])
plt.show()
counter=counter+1;
if prediction>0.5:
print("Cat")
else:
print("dog")
print("____________________________________________________")
XTest=tf.convert_to_tensor(list(X_test.values))
predictions_list = model.predict(XTest)
predictions_list
myList = np.rint(predictions_list)
len(myList)
mistakesIndex=[]
for i in range(len(myList)):
if not(myList[i]==y_test.values[i]):
mistakesIndex.append(i)
len(mistakesIndex)
print('ERROR: ',(len(mistakesIndex)/len(myList))*100)
number_of_ex=8
print("Note!!")
print("0=>Dog")
print("1=>Cat")
for i in range(0,number_of_ex):
print("right value:",y_test.values[mistakesIndex[i]])
print("Predection:",int(myList[mistakesIndex[i]]))
plt.imshow(XTest[int(myList[i])])
plt.show()